bitkeeper revision 1.259.2.3 (3f0ad1f5bs1q4bh_4dbAVVvBLeoAlw)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 8 Jul 2003 14:15:17 +0000 (14:15 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 8 Jul 2003 14:15:17 +0000 (14:15 +0000)
Many files:
  Fix auto destruction of direct-mapped vm areas.
.del-mmu_context.c~74789121d58c5b63:
  Delete: xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c

.rootkeys
xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_core.c
xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c
xenolinux-2.4.21-sparse/arch/xeno/kernel/process.c
xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile
xenolinux-2.4.21-sparse/arch/xeno/mm/get_unmapped_area.c
xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c [deleted file]
xenolinux-2.4.21-sparse/include/asm-xeno/mmu_context.h

index d659f6d810e301492da6742574c3adcfb674a27a..684ca66f3939c3b14ff3d8715b8eb53dc968a343 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3e5a4e66TyNNUEXkr5RxqvQhXK1MQA xenolinux-2.4.21-sparse/arch/xeno/mm/get_unmapped_area.c
 3e5a4e668SE9rixq4ahho9rNhLUUFQ xenolinux-2.4.21-sparse/arch/xeno/mm/hypervisor.c
 3e5a4e661gLzzff25pJooKIIWe7IWg xenolinux-2.4.21-sparse/arch/xeno/mm/init.c
-3e5a4e66U45cAIoHmxg0y1e1XhzVCA xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c
 3e5a4e66qRlSTcjafidMB6ulECADvg xenolinux-2.4.21-sparse/arch/xeno/vmlinux.lds
 3ea53c6em6uzVHSiGqrbbAVofyRY_g xenolinux-2.4.21-sparse/drivers/block/genhd.c
 3e5a4e66mrtlmV75L1tjKDg8RaM5gA xenolinux-2.4.21-sparse/drivers/block/ll_rw_blk.c
index 8356992f2f3e6dd424db3fc961ac6d95ddea4b60..3377c3b3a6672bd9aad6d200047138172094c2e9 100644 (file)
@@ -52,7 +52,6 @@ static struct proc_dir_entry *dom_list_intf;
 
 unsigned long direct_mmap(unsigned long, unsigned long, pgprot_t, int, int);
 int direct_unmap(unsigned long, unsigned long);
-int direct_disc_unmap(unsigned long, unsigned long, int);
 
 static unsigned char readbuf[1204];
 
@@ -161,8 +160,8 @@ static ssize_t dom_mem_write(struct file * file, const char * buff,
     
     copy_from_user(&mem_data, (dom_mem_t *)buff, sizeof(dom_mem_t));
     
-    if(direct_disc_unmap(mem_data.vaddr, mem_data.start_pfn
-                         mem_data.tot_pages) == 0){
+    if ( direct_unmap(mem_data.vaddr
+                      mem_data.tot_pages << PAGE_SHIFT) == 0 ) {
         return sizeof(sizeof(dom_mem_t));
     } else {
         return -1;
index 9d14070a1e6d4de6154fe95237a468f0990b7cda..8b1ffcd5f35b3bfa5bdb464b5764dd425d772684 100644 (file)
@@ -44,79 +44,79 @@ static inline void forget_pte(pte_t page)
 }
 
 static inline void direct_remappte_range(pte_t * pte, unsigned long address, unsigned long size,
-       unsigned long phys_addr, pgprot_t prot)
+                                         unsigned long phys_addr, pgprot_t prot)
 {
-       unsigned long end;
+    unsigned long end;
 
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       do {
-               pte_t oldpage;
-               oldpage = ptep_get_and_clear(pte);
+    address &= ~PMD_MASK;
+    end = address + size;
+    if (end > PMD_SIZE)
+        end = PMD_SIZE;
+    do {
+        pte_t oldpage;
+        oldpage = ptep_get_and_clear(pte);
 
-               direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot));
+        direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot));
 
-               forget_pte(oldpage);
-               address += PAGE_SIZE;
-               phys_addr += PAGE_SIZE;
-               pte++;
-       } while (address && (address < end));
+        forget_pte(oldpage);
+        address += PAGE_SIZE;
+        phys_addr += PAGE_SIZE;
+        pte++;
+    } while (address && (address < end));
 
 }
 
 static inline int direct_remappmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
-       unsigned long phys_addr, pgprot_t prot)
+                                        unsigned long phys_addr, pgprot_t prot)
 {
-       unsigned long end;
-
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       phys_addr -= address;
-       do {
-               pte_t * pte = pte_alloc(mm, pmd, address);
-               if (!pte)
-                       return -ENOMEM;
-               direct_remappte_range(pte, address, end - address, address + phys_addr, prot);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address && (address < end));
-       return 0;
+    unsigned long end;
+
+    address &= ~PGDIR_MASK;
+    end = address + size;
+    if (end > PGDIR_SIZE)
+        end = PGDIR_SIZE;
+    phys_addr -= address;
+    do {
+        pte_t * pte = pte_alloc(mm, pmd, address);
+        if (!pte)
+            return -ENOMEM;
+        direct_remappte_range(pte, address, end - address, address + phys_addr, prot);
+        address = (address + PMD_SIZE) & PMD_MASK;
+        pmd++;
+    } while (address && (address < end));
+    return 0;
 }
 
 /*  Note: this is only safe if the mm semaphore is held when called. */
 int direct_remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
 {
-       int error = 0;
-       pgd_t * dir;
-       unsigned long beg = from;
-       unsigned long end = from + size;
-       struct mm_struct *mm = current->mm;
-
-       phys_addr -= from;
-       dir = pgd_offset(mm, from);
-       flush_cache_range(mm, beg, end);
-       if (from >= end)
-               BUG();
-
-       spin_lock(&mm->page_table_lock);
-       do {
-               pmd_t *pmd = pmd_alloc(mm, dir, from);
-               error = -ENOMEM;
-               if (!pmd)
-                       break;
-               error = direct_remappmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
-               if (error)
-                       break;
-               from = (from + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (from && (from < end));
-       spin_unlock(&mm->page_table_lock);
-       flush_tlb_range(mm, beg, end);
-       return error;
+    int error = 0;
+    pgd_t * dir;
+    unsigned long beg = from;
+    unsigned long end = from + size;
+    struct mm_struct *mm = current->mm;
+
+    phys_addr -= from;
+    dir = pgd_offset(mm, from);
+    flush_cache_range(mm, beg, end);
+    if (from >= end)
+        BUG();
+
+    spin_lock(&mm->page_table_lock);
+    do {
+        pmd_t *pmd = pmd_alloc(mm, dir, from);
+        error = -ENOMEM;
+        if (!pmd)
+            break;
+        error = direct_remappmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
+        if (error)
+            break;
+        from = (from + PGDIR_SIZE) & PGDIR_MASK;
+        dir++;
+    } while (from && (from < end));
+    spin_unlock(&mm->page_table_lock);
+    flush_tlb_range(mm, beg, end);
+    return error;
 }
 
 /* 
@@ -124,7 +124,7 @@ int direct_remap_page_range(unsigned long from, unsigned long phys_addr, unsigne
  * found from frame table beginning at the given first_pg index
  */ 
 int direct_remap_disc_page_range(unsigned long from, 
-                unsigned long first_pg, int tot_pages, pgprot_t prot)
+                                 unsigned long first_pg, int tot_pages, pgprot_t prot)
 {
     dom0_op_t dom0_op;
     unsigned long *pfns = get_free_page(GFP_KERNEL);
@@ -153,7 +153,7 @@ int direct_remap_disc_page_range(unsigned long from,
         }
     }
 
-out:
+ out:
     free_page(pfns);
     return tot_pages;
 } 
@@ -165,7 +165,7 @@ out:
  */
 
 unsigned long direct_mmap(unsigned long phys_addr, unsigned long size, 
-                pgprot_t prot, int flag, int tot_pages)
+                          pgprot_t prot, int flag, int tot_pages)
 {
     direct_mmap_node_t * dmmap;
     struct list_head * entry;
@@ -190,17 +190,17 @@ unsigned long direct_mmap(unsigned long phys_addr, unsigned long size,
     dmmap = (direct_mmap_node_t *)kmalloc(sizeof(direct_mmap_node_t), GFP_KERNEL);
     dmmap->vm_start = addr;
     dmmap->vm_end = addr + size;
-       entry = find_direct(&current->mm->context.direct_list, addr);
-       if(entry != &current->mm->context.direct_list){
-               list_add_tail(&dmmap->list, entry);
-       } else {
+    entry = find_direct(&current->mm->context.direct_list, addr);
+    if(entry != &current->mm->context.direct_list){
+        list_add_tail(&dmmap->list, entry);
+    } else {
        list_add_tail(&dmmap->list, &current->mm->context.direct_list);
-       }
+    }
 
     /* and perform the mapping */
     if(flag == MAP_DISCONT){
         ret = direct_remap_disc_page_range(addr, phys_addr >> PAGE_SHIFT, 
-            tot_pages, prot);
+                                           tot_pages, prot);
     } else {
         ret = direct_remap_page_range(addr, phys_addr, size, prot);
     }
@@ -208,7 +208,7 @@ unsigned long direct_mmap(unsigned long phys_addr, unsigned long size,
     if(ret == 0)
         ret = addr;
 
-out: 
+ out: 
     return ret;
 }
 
@@ -216,60 +216,60 @@ out:
  * needed
  */
 static inline int direct_zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, 
-                unsigned long size)
+                                       unsigned long size)
 {
-       unsigned long offset;
-       pte_t * ptep;
-       int freed = 0;
-
-       if (pmd_none(*pmd))
-               return 0;
-       if (pmd_bad(*pmd)) {
-               pmd_ERROR(*pmd);
-               pmd_clear(pmd);
-               return 0;
-       }
-       ptep = pte_offset(pmd, address);
-       offset = address & ~PMD_MASK;
-       if (offset + size > PMD_SIZE)
-               size = PMD_SIZE - offset;
-       size &= PAGE_MASK;
-       for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
-               pte_t pte = *ptep;
-               if (pte_none(pte))
-                       continue;
-               freed ++;
-               direct_pte_clear(ptep);
-       }
-
-       return freed;
+    unsigned long offset;
+    pte_t * ptep;
+    int freed = 0;
+
+    if (pmd_none(*pmd))
+        return 0;
+    if (pmd_bad(*pmd)) {
+        pmd_ERROR(*pmd);
+        pmd_clear(pmd);
+        return 0;
+    }
+    ptep = pte_offset(pmd, address);
+    offset = address & ~PMD_MASK;
+    if (offset + size > PMD_SIZE)
+        size = PMD_SIZE - offset;
+    size &= PAGE_MASK;
+    for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
+        pte_t pte = *ptep;
+        if (pte_none(pte))
+            continue;
+        freed ++;
+        direct_pte_clear(ptep);
+    }
+
+    return freed;
 }
 
 static inline int direct_zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, 
-                unsigned long address, unsigned long size)
+                                       unsigned long address, unsigned long size)
 {
-       pmd_t * pmd;
-       unsigned long end;
-       int freed;
-
-       if (pgd_none(*dir))
-               return 0;
-       if (pgd_bad(*dir)) {
-               pgd_ERROR(*dir);
-               pgd_clear(dir);
-               return 0;
-       }
-       pmd = pmd_offset(dir, address);
-       end = address + size;
-       if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
-               end = ((address + PGDIR_SIZE) & PGDIR_MASK);
-       freed = 0;
-       do {
-               freed += direct_zap_pte_range(tlb, pmd, address, end - address);
-               address = (address + PMD_SIZE) & PMD_MASK; 
-               pmd++;
-       } while (address < end);
-       return freed;
+    pmd_t * pmd;
+    unsigned long end;
+    int freed;
+
+    if (pgd_none(*dir))
+        return 0;
+    if (pgd_bad(*dir)) {
+        pgd_ERROR(*dir);
+        pgd_clear(dir);
+        return 0;
+    }
+    pmd = pmd_offset(dir, address);
+    end = address + size;
+    if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
+        end = ((address + PGDIR_SIZE) & PGDIR_MASK);
+    freed = 0;
+    do {
+        freed += direct_zap_pte_range(tlb, pmd, address, end - address);
+        address = (address + PMD_SIZE) & PMD_MASK; 
+        pmd++;
+    } while (address < end);
+    return freed;
 }
 
 /*
@@ -277,91 +277,67 @@ static inline int direct_zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir,
  */
 void direct_zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
 {
-       mmu_gather_t *tlb;
-       pgd_t * dir;
-       unsigned long start = address, end = address + size;
-       int freed = 0;
-
-       dir = pgd_offset(mm, address);
-
-       /*
-        * This is a long-lived spinlock. That's fine.
-        * There's no contention, because the page table
-        * lock only protects against kswapd anyway, and
-        * even if kswapd happened to be looking at this
-        * process we _want_ it to get stuck.
-        */
-       if (address >= end)
-               BUG();
-       spin_lock(&mm->page_table_lock);
-       flush_cache_range(mm, address, end);
-       tlb = tlb_gather_mmu(mm);
-
-       do {
-               freed += direct_zap_pmd_range(tlb, dir, address, end - address);
-               address = (address + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (address && (address < end));
-
-       /* this will flush any remaining tlb entries */
-       tlb_finish_mmu(tlb, start, end);
-
-    /* decrementing rss removed */
-
-       spin_unlock(&mm->page_table_lock);
-}
-
-int direct_unmap(unsigned long addr, unsigned long size)
-{
-    direct_mmap_node_t * node;
-    struct list_head * curr;
-    struct list_head * direct_list = &current->mm->context.direct_list;    
+    mmu_gather_t *tlb;
+    pgd_t * dir;
+    unsigned long start = address, end = address + size;
+    int freed = 0;
+
+    dir = pgd_offset(mm, address);
+
+    /*
+     * This is a long-lived spinlock. That's fine.
+     * There's no contention, because the page table
+     * lock only protects against kswapd anyway, and
+     * even if kswapd happened to be looking at this
+     * process we _want_ it to get stuck.
+     */
+    if (address >= end)
+        BUG();
+    spin_lock(&mm->page_table_lock);
+    flush_cache_range(mm, address, end);
+    tlb = tlb_gather_mmu(mm);
 
-    curr = direct_list->next;
-    while(curr != direct_list){
-        node = list_entry(curr, direct_mmap_node_t, list);
-        if(node->vm_start == addr)
-            break;
-        curr = curr->next;
-    }
+    do {
+        freed += direct_zap_pmd_range(tlb, dir, address, end - address);
+        address = (address + PGDIR_SIZE) & PGDIR_MASK;
+        dir++;
+    } while (address && (address < end));
 
-    if(curr == direct_list)
-        return -1;
+    /* this will flush any remaining tlb entries */
+    tlb_finish_mmu(tlb, start, end);
 
-    list_del(&node->list);
-    kfree(node);
-
-    direct_zap_page_range(current->mm, addr, size);
-    return 0;
+    /* decrementing rss removed */
+    spin_unlock(&mm->page_table_lock);
 }
 
-int direct_disc_unmap(unsigned long from, unsigned long first_pg, int tot_pages)
+
+int direct_unmap(unsigned long addr, unsigned long size)
 {
-    int count = 0;
+    int count = 0, tot_pages = (size+PAGE_SIZE-1) >> PAGE_SHIFT;
     direct_mmap_node_t * node;
     struct list_head * curr;
     struct list_head * direct_list = &current->mm->context.direct_list;    
 
     curr = direct_list->next;
-    while(curr != direct_list){
+    while ( curr != direct_list )
+    {
         node = list_entry(curr, direct_mmap_node_t, list);
-
-        if(node->vm_start == from)
+        if ( node->vm_start == addr )
             break;
         curr = curr->next;
     }
 
-    if(curr == direct_list)
+    if ( curr == direct_list )
         return -1;
 
     list_del(&node->list);
     kfree(node);
 
-    while(count < tot_pages){
-            direct_zap_page_range(current->mm, from, PAGE_SIZE);
-            from += PAGE_SIZE;
-            count++;
+    while ( count < tot_pages )
+    {
+        direct_zap_page_range(current->mm, addr, PAGE_SIZE);
+        addr += PAGE_SIZE;
+        count++;
     }
 
     return 0;
index 86f8e680f4e06b219b312584c91ef81c3fa48388..f91df8f6a76024075846dc7594a71539ce8107dc 100644 (file)
@@ -145,6 +145,9 @@ void release_segments(struct mm_struct *mm)
         flush_page_update_queue();
         vfree(ldt);
     }
+
+    /* YUK! We do this here because destroy_context() is too late. */
+    destroy_direct_list(mm);
 }
 
 /*
index 9ab0821b2ef72fbdbfb5962a7082e5c1b0d17f0c..33091ac78d219786eace0630105601742994e7e7 100644 (file)
@@ -9,7 +9,7 @@
 
 O_TARGET := mm.o
 
-obj-y   := init.o fault.o extable.o pageattr.o hypervisor.o get_unmapped_area.o mmu_context.o
+obj-y   := init.o fault.o extable.o pageattr.o hypervisor.o get_unmapped_area.o
 
 export-objs := pageattr.o
 
index 708a2b72814e43ab223d9035bcccee45f745b2dd..c4b322f9bc6f433cc0af8935fdb070d673b8ef0d 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 
+extern int direct_unmap(unsigned long, unsigned long);
+
+
+int init_direct_list(struct mm_struct *mm)
+{
+    INIT_LIST_HEAD(&mm->context.direct_list);
+    return 0;
+}
+
+
+void destroy_direct_list(struct mm_struct *mm)
+{
+    struct list_head *curr, *direct_list = &mm->context.direct_list;
+    while ( (curr = direct_list->next) != direct_list )
+    {
+        direct_mmap_node_t *node = list_entry(curr, direct_mmap_node_t, list);
+        if ( direct_unmap(node->vm_start, node->vm_end - node->vm_start) != 0 )
+            BUG();
+    }
+}
+
+
 struct list_head *find_direct(struct list_head *list, unsigned long addr)
 {
     struct list_head * curr;
@@ -29,6 +51,7 @@ struct list_head *find_direct(struct list_head *list, unsigned long addr)
     return curr;
 }
 
+
 unsigned long arch_get_unmapped_area(struct file *filp, 
                                      unsigned long addr, 
                                      unsigned long len, 
diff --git a/xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c b/xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c
deleted file mode 100644 (file)
index b8f41fb..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-
-#include <linux/slab.h>
-#include <linux/list.h>
-
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
-    INIT_LIST_HEAD(&mm->context.direct_list);
-    return 0;
-}
-
-/* just free all elements of list identifying directly mapped areas */
-void destroy_context(struct mm_struct *mm)
-{
-    direct_mmap_node_t * node;
-    struct list_head * curr;
-    struct list_head * direct_list = &mm->context.direct_list;
-
-    curr = direct_list->next;
-    while(curr != direct_list){
-        node = list_entry(curr, direct_mmap_node_t, list);
-        curr = curr->next;
-        list_del(&node->list);
-        kfree(node);
-   }
-
-}
index cdf5319f486d1b7c03b18550533d24fb59ca2064..83514ebd14d9201aa8201c1aa502b9611ccc1ab5 100644 (file)
@@ -7,15 +7,13 @@
 #include <asm/pgalloc.h>
 #include <asm/multicall.h>
 
-/*
- * possibly do the LDT unload here?
- */
+/* Hooked directly from 'init_new_context'. */
+extern int init_direct_list(struct mm_struct *);
+/* Called from 'release_segments'. */
+extern void destroy_direct_list(struct mm_struct *);
 
-extern int init_new_context(struct task_struct *tsk, struct mm_struct *);
-extern void destroy_context(struct mm_struct *);
-
-//#define destroy_context(mm)          do { } while(0)
-//#define init_new_context(tsk,mm)     0
+#define destroy_context(mm)            do { } while(0)
+#define init_new_context(tsk,mm)       init_direct_list(mm)
 
 #ifdef CONFIG_SMP